bitkeeper revision 1.1323 (426441f4N_rRW9JqV6OGmWTaHrLiCQ)
authoriap10@freefall.cl.cam.ac.uk <iap10@freefall.cl.cam.ac.uk>
Mon, 18 Apr 2005 23:25:40 +0000 (23:25 +0000)
committeriap10@freefall.cl.cam.ac.uk <iap10@freefall.cl.cam.ac.uk>
Mon, 18 Apr 2005 23:25:40 +0000 (23:25 +0000)
- Update FreeBSD sparse tree to build against latest -unstable
- add conditional support for writable pagetables (don't use
  currently locks up xen)
- block driver not update to use grant tables yet, DOM0 will have to
  be re-compiled with grant table substrate disabled
Signed-off-by: Kip Macy <kmacy@fsmware.com>
Signed-off-by: ian@xensource.com
freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c
freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c
freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c
freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h
freebsd-5.3-xen-sparse/i386-xen/include/pmap.h
freebsd-5.3-xen-sparse/i386-xen/include/xenfunc.h
freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h
freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c

index 03f24fc170436e73bffc0465f80efef512db3c8e..0d18e7d4c058646391b6869fbfc6a6af6974fffd 100644 (file)
@@ -224,7 +224,6 @@ cpu_startup(void *dummy)
        /*
         * Good {morning,afternoon,evening,night}.
         */
-    /* XXX need to write clock driver */
        startrtclock();
 
        printcpuinfo();
@@ -1375,6 +1374,7 @@ extern unsigned long cpu0prvpage;
 extern unsigned long *SMPpt;
 pteinfo_t *pteinfo_list;
 unsigned long *xen_machine_phys = ((unsigned long *)VADDR(1008, 0));
+pt_entry_t *KPTphysv;
 int preemptable;
 int gdt_set;
 
@@ -1386,6 +1386,10 @@ void
 initvalues(start_info_t *startinfo)
 { 
     int i;
+#ifdef WRITABLE_PAGETABLES
+    HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
+#endif
+
     xen_start_info = startinfo;
     xen_phys_machine = (unsigned long *)startinfo->mfn_list;
     unsigned long tmpindex = ((__pa(xen_start_info->pt_base) >> PAGE_SHIFT) + xen_start_info->nr_pt_frames) + 3 /* number of pages allocated after the pts + 1*/;
@@ -1393,6 +1397,7 @@ initvalues(start_info_t *startinfo)
     /* pre-zero unused mapped pages */
     bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (1024 - tmpindex)*PAGE_SIZE); 
     IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base));
+    KPTphysv = (pt_entry_t *)(startinfo->pt_base + PAGE_SIZE);
     XENPRINTF("IdlePTD %p\n", IdlePTD);
     XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx "
              "mod_start: 0x%lx mod_len: 0x%lx\n",
@@ -1401,9 +1406,9 @@ initvalues(start_info_t *startinfo)
              xen_start_info->mod_start, xen_start_info->mod_len);
     
     /* setup self-referential mapping first so vtomach will work */
-    xpq_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 
+    xen_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 
                        PG_V | PG_A);
-    mcl_flush_queue();
+    xen_flush_queue();
     /* Map proc0's UPAGES */
     proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT));
     tmpindex += UAREA_PAGES;
@@ -1431,10 +1436,10 @@ initvalues(start_info_t *startinfo)
     SMPpt[0] = vtomach(cpu0prvpage) | PG_RW | PG_M | PG_V | PG_A;
 
     /* map SMP page table RO */
-    PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW, TRUE);
+    PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW);
 
     /* put the page table into the pde */
-    xpq_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
+    xen_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
 
     tmpindex++;
 #endif
@@ -1448,20 +1453,16 @@ initvalues(start_info_t *startinfo)
 #endif
     /* unmap remaining pages from initial 4MB chunk */
     for (i = tmpindex; i%1024 != 0; i++) 
-       PT_CLEAR(KERNBASE + (i << PAGE_SHIFT), TRUE);
+       PT_CLEAR_VA(KPTphysv + i, TRUE);
 
     /* allocate remainder of NKPT pages */
     for (i = 0; i < NKPT-1; i++, tmpindex++)
-       xpq_queue_pt_update(IdlePTD + KPTDI + i + 1, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A);
-    tmpindex += NKPT-1;
-
-
-
+       PT_SET_VA(((unsigned long *)startinfo->pt_base) + KPTDI + i + 1, (tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A, TRUE);
     tmpindex += NKPT-1;
     PT_UPDATES_FLUSH();
 
     HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex << PAGE_SHIFT));
-    PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M, TRUE);
+    PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M);
     tmpindex++;
 
     HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list = (unsigned long)xen_phys_machine;
@@ -1568,7 +1569,7 @@ init386(void)
        for (x = 0; x < NGDT; x++)
            ssdtosd(&gdt_segs[x], &gdt[x].sd);
 
-       PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW, TRUE); 
+       PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW);
        gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
        if (HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1)) {
            XENPRINTF("set_gdt failed\n");
@@ -1617,7 +1618,7 @@ init386(void)
        default_proc_ldt.ldt_len = 6;
        _default_ldt = (int)&default_proc_ldt;
        PCPU_SET(currentldt, _default_ldt)
-       PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW, TRUE);
+       PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
        xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
 
 
index 9f2ea02f6753817fc9d75470d518297ccaece21b..89705be1af37b7b43d6bc46fa029a31ed34fe9d8 100644 (file)
@@ -793,7 +793,7 @@ pmap_pte(pmap_t pmap, vm_offset_t va)
                newpf = PT_GET(pde) & PG_FRAME;
                tmppf = PT_GET(PMAP2) & PG_FRAME;
                if (tmppf != newpf) {
-                       PT_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
+                       PD_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE);
                        pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
                }
                return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
@@ -852,7 +852,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t va)
                newpf = PT_GET(pde) & PG_FRAME;
                tmppf = PT_GET(PMAP1) & PG_FRAME;
                if (tmppf != newpf) {
-                       PT_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
+                       PD_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE);
 #ifdef SMP
                        PMAP1cpu = PCPU_GET(cpuid);
 #endif
@@ -955,7 +955,10 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 PMAP_INLINE void 
 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 {
-       PT_SET(va, pa | PG_RW | PG_V | pgeflag, TRUE);
+       pt_entry_t *pte;
+
+       pte = vtopte(va);
+       pte_store(pte, pa | PG_RW | PG_V | pgeflag);
 }
 
 /*
@@ -965,7 +968,10 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 PMAP_INLINE void
 pmap_kremove(vm_offset_t va)
 {
-       PT_CLEAR(va, TRUE);
+       pt_entry_t *pte;
+
+       pte = vtopte(va);
+       pte_clear(pte);
 }
 
 /*
@@ -984,12 +990,10 @@ vm_offset_t
 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 {
        vm_offset_t va, sva;
-       pt_entry_t *pte;
        
        va = sva = *virt;
        while (start < end) {
-               pte = vtopte(va);
-               PT_SET_VA(pte, start | PG_RW | PG_V | pgeflag, FALSE);
+               pmap_kenter(va, start);
                va += PAGE_SIZE;
                start += PAGE_SIZE;
        }
@@ -1016,8 +1020,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
 
        va = sva;
        while (count-- > 0) {
-               PT_SET(va, VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag, 
-                         FALSE);
+               pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
                va += PAGE_SIZE;
                m++;
        }
@@ -1037,7 +1040,7 @@ pmap_qremove(vm_offset_t sva, int count)
 
        va = sva;
        while (count-- > 0) {
-               PT_CLEAR(va, FALSE);
+               pmap_kremove(va);
                va += PAGE_SIZE;
        }
        /* invalidate will flush the update queue */
@@ -1070,8 +1073,8 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
        /*
         * unmap the page table page
         */
-       xpq_queue_unpin_table(pmap->pm_pdir[m->pindex]);
-       PT_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE);
+       xen_pt_unpin(pmap->pm_pdir[m->pindex]);
+       PD_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE);
        --pmap->pm_stats.resident_count;
 
        /*
@@ -1188,16 +1191,14 @@ pmap_pinit(struct pmap *pmap)
        /* install self-referential address mapping entry(s) */
        for (i = 0; i < NPGPTD; i++) {
                ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
-               pmap->pm_pdir[PTDPTDI + i] = ma | PG_V | PG_A;
+               pmap->pm_pdir[PTDPTDI + i] = ma | PG_V | PG_A | PG_M;
 #ifdef PAE
                pmap->pm_pdpt[i] = ma | PG_V;
 #endif
-#ifndef PAE
-               PT_SET_MA(pmap->pm_pdir, ma | PG_V | PG_A, TRUE);  
-#else
-               panic("FIX ME!");
-#endif
-               xpq_queue_pin_table(ma, XPQ_PIN_L2_TABLE);
+               /* re-map page directory read-only */
+               PT_SET_MA(pmap->pm_pdir, ma | PG_V | PG_A);
+               xen_pgd_pin(ma);
+
        }
 
        pmap->pm_active = 0;
@@ -1249,8 +1250,8 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
        pmap->pm_stats.resident_count++;
 
        ptepa = VM_PAGE_TO_PHYS(m);
-       xpq_queue_pin_table(xpmap_ptom(ptepa), XPQ_PIN_L1_TABLE);
-       PT_SET_VA(&pmap->pm_pdir[ptepindex], 
+       xen_pt_pin(xpmap_ptom(ptepa));
+       PD_SET_VA(&pmap->pm_pdir[ptepindex], 
                (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
 
        return m;
@@ -1425,12 +1426,12 @@ pmap_release(pmap_t pmap)
                ptdpg[i] = PHYS_TO_VM_PAGE(PT_GET(&pmap->pm_pdir[PTDPTDI + i]));
 
        for (i = 0; i < nkpt + NPGPTD; i++)
-               PT_CLEAR_VA(&pmap->pm_pdir[PTDPTDI + i], FALSE);
+               PD_CLEAR_VA(&pmap->pm_pdir[PTDPTDI + i], FALSE);
 
        bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
            sizeof(*pmap->pm_pdir));
 #ifdef SMP
-       PT_CLEAR_VA(&pmap->pm_pdir[MPPTDI], FALSE);
+       PD_CLEAR_VA(&pmap->pm_pdir[MPPTDI], FALSE);
 #endif
        PT_UPDATES_FLUSH();
        pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
@@ -1440,8 +1441,7 @@ pmap_release(pmap_t pmap)
                m = ptdpg[i];
                
                ma = xpmap_ptom(VM_PAGE_TO_PHYS(m));
-                xpq_queue_unpin_table(ma);
-               pmap_zero_page(m);
+                xen_pgd_unpin(ma);
 #ifdef PAE
                KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
                    ("pmap_release: got wrong ptd page"));
@@ -1516,12 +1516,12 @@ pmap_growkernel(vm_offset_t addr)
                pmap_zero_page(nkpg);
                ptppaddr = VM_PAGE_TO_PHYS(nkpg);
                newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
-               PT_SET_VA(&pdir_pde(PTD, kernel_vm_end), newpdir, TRUE);
+               PD_SET_VA(&pdir_pde(PTD, kernel_vm_end), newpdir, TRUE);
 
                mtx_lock_spin(&allpmaps_lock);
                LIST_FOREACH(pmap, &allpmaps, pm_list) {
                        pde = pmap_pde(pmap, kernel_vm_end);
-                       PT_SET_VA(pde, newpdir, FALSE);
+                       PD_SET_VA(pde, newpdir, FALSE);
                }
                PT_UPDATES_FLUSH();
                mtx_unlock_spin(&allpmaps_lock);
@@ -1738,7 +1738,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
                 * Check for large page.
                 */
                if ((ptpaddr & PG_PS) != 0) {
-                       PT_CLEAR_VA(pmap->pm_pdir[pdirindex], TRUE);
+                       PD_CLEAR_VA(pmap->pm_pdir[pdirindex], TRUE);
                        pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
                        anyvalid = 1;
                        continue;
@@ -2222,9 +2222,9 @@ retry:
         * Now validate mapping with RO protection
         */
        if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
-               PT_SET(va, pa | PG_V | PG_U, TRUE);
+               pte_store(pte, pa | PG_V | PG_U);
        else
-               PT_SET(va, pa | PG_V | PG_U | PG_MANAGED, TRUE);
+               pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 out:
        vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
@@ -2312,7 +2312,7 @@ retry:
                pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
                npdes = size >> PDRSHIFT;
                for(i = 0; i < npdes; i++) {
-                       PT_SET_VA(&pmap->pm_pdir[ptepindex],
+                       PD_SET_VA(&pmap->pm_pdir[ptepindex],
                            ptepa | PG_U | PG_RW | PG_V | PG_PS, FALSE);
                        ptepa += NBPDR;
                        ptepindex += 1;
@@ -2330,7 +2330,7 @@ pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len)
        for (i = 0; i < npages; i++) {
                pt_entry_t *pte;
                pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
-               PT_SET_MA(va + i*PAGE_SIZE, *pte & ~(PG_RW|PG_M), FALSE);
+               pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M)));
                PMAP_MARK_PRIV(xpmap_mtop(*pte));
                pmap_pte_release(pte);
        }
@@ -2345,7 +2345,7 @@ pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len)
                pt_entry_t *pte;
                pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
                PMAP_MARK_UNPRIV(xpmap_mtop(*pte));
-               PT_SET_MA(va + i*PAGE_SIZE, *pte | (PG_RW|PG_M), FALSE);
+               pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M));
                pmap_pte_release(pte);
        }
        PT_UPDATES_FLUSH();
@@ -3010,7 +3010,7 @@ pmap_mapdev(pa, size)
                panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 
        for (tmpva = va; size > 0; ) {
-               PT_SET(tmpva, pa | PG_RW | PG_V | pgeflag, FALSE);
+               pmap_kenter(tmpva, pa);
                size -= PAGE_SIZE;
                tmpva += PAGE_SIZE;
                pa += PAGE_SIZE;
@@ -3032,7 +3032,7 @@ pmap_unmapdev(va, size)
        offset = va & PAGE_MASK;
        size = roundup(offset + size, PAGE_SIZE);
        for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
-               PT_CLEAR(tmpva, FALSE);
+               pmap_kremove(tmpva);
        pmap_invalidate_range(kernel_pmap, va, tmpva);
        kmem_free(kernel_map, base, size);
 }
index 1dcd9448d3ef44dbfd1b350a7138806933d92e99..b16edb6534435fc0a47f6caf854cbf4be3f3e345 100644 (file)
@@ -1,8 +1,7 @@
-/*     $NetBSD:$       */
-
 /*
  *
  * Copyright (c) 2004 Christian Limpach.
+ * Copyright (c) 2004,2005 Kip Macy
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -381,39 +380,31 @@ printk(const char *fmt, ...)
         (void)HYPERVISOR_console_write(buf, ret);
 }
 
-#define XPQUEUE_SIZE 128
+#define PANIC_IF(exp) if (unlikely(exp)) {printk("%s failed\n",#exp); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 
 
-#define MCLQUEUE_SIZE 32
+
+#define XPQUEUE_SIZE 128
 #ifdef SMP
 /* per-cpu queues and indices */
-static multicall_entry_t mcl_queue[MAX_VIRT_CPUS][MCLQUEUE_SIZE];
 static mmu_update_t xpq_queue[MAX_VIRT_CPUS][XPQUEUE_SIZE];
-static int mcl_idx[MAX_VIRT_CPUS];  
 static int xpq_idx[MAX_VIRT_CPUS];  
 
-#define MCL_QUEUE mcl_queue[vcpu]
 #define XPQ_QUEUE xpq_queue[vcpu]
-#define MCL_IDX mcl_idx[vcpu]
 #define XPQ_IDX xpq_idx[vcpu]
 #define SET_VCPU() int vcpu = smp_processor_id()
 #else
-static multicall_entry_t mcl_queue[MCLQUEUE_SIZE];
 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
-static int mcl_idx = 0;
 static int xpq_idx = 0;
 
-#define MCL_QUEUE mcl_queue
 #define XPQ_QUEUE xpq_queue
-#define MCL_IDX mcl_idx
 #define XPQ_IDX xpq_idx
 #define SET_VCPU()
 #endif
 #define XPQ_IDX_INC atomic_add_int(&XPQ_IDX, 1);
-#define MCL_IDX_INC atomic_add_int(&MCL_IDX, 1);
 
 
 static __inline void
-_xpq_flush_queue(void)
+_xen_flush_queue(void)
 {
     SET_VCPU();
     int _xpq_idx = XPQ_IDX;
@@ -423,7 +414,7 @@ _xpq_flush_queue(void)
     XPQ_IDX = 0;
     /* Make sure index is cleared first to avoid double updates. */
     error = HYPERVISOR_mmu_update((mmu_update_t *)&XPQ_QUEUE,
-                                 _xpq_idx, NULL);
+                                 _xpq_idx, NULL, DOMID_SELF);
     
     if (__predict_false(error < 0)) {
        for (i = 0; i < _xpq_idx; i++)
@@ -432,81 +423,44 @@ _xpq_flush_queue(void)
     }
 
 }
-static void
-xpq_flush_queue(void)
-{
-    SET_VCPU();
-
-    if (XPQ_IDX != 0) _xpq_flush_queue();
-}
-
-static __inline void
-_mcl_flush_queue(void)
-{
-    SET_VCPU();
-    int _mcl_idx = MCL_IDX;
-
-    MCL_IDX = 0;
-    (void)HYPERVISOR_multicall(&MCL_QUEUE, _mcl_idx);
-}
 
 void
-mcl_flush_queue(void)
+xen_flush_queue(void)
 {
-    
-    if (__predict_true(mcl_idx != 0)) _mcl_flush_queue();
-    /* XXX: until we can remove the  pervasive 
-     * __HYPERVISOR_update_va_mapping calls, we have 2 queues.  In order
-     * to ensure that they never get out of sync, only 1 flush interface
-     * is provided.
-     */
-    xpq_flush_queue();
+    SET_VCPU();
+    if (XPQ_IDX != 0) _xen_flush_queue();
 }
 
-
 static __inline void
-xpq_increment_idx(void)
+xen_increment_idx(void)
 {
     SET_VCPU();
 
     XPQ_IDX++;
     if (__predict_false(XPQ_IDX == XPQUEUE_SIZE))
-       xpq_flush_queue();
-}
-
-static __inline void
-mcl_increment_idx(void)
-{
-    SET_VCPU();
-    MCL_IDX++;
-
-    if (__predict_false(MCL_IDX == MCLQUEUE_SIZE))
-       mcl_flush_queue();
+       xen_flush_queue();
 }
 
 void
-xpq_queue_invlpg(vm_offset_t va)
+xen_invlpg(vm_offset_t va)
 {
-    SET_VCPU();
-    
-    XPQ_QUEUE[XPQ_IDX].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND;
-    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_INVLPG;
-    xpq_increment_idx();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_INVLPG_LOCAL;
+    op.linear_addr = va & ~PAGE_MASK;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void
 load_cr3(uint32_t val)
 {
-    xpq_queue_pt_switch(val);
-    xpq_flush_queue();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_NEW_BASEPTR;
+    op.mfn = xpmap_ptom(val) >> PAGE_SHIFT;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-void
-xen_set_ldt(vm_offset_t base, uint32_t entries)
-{
-    xpq_queue_set_ldt(base, entries);
-    _xpq_flush_queue();
-}
 
 void
 xen_machphys_update(unsigned long mfn, unsigned long pfn)
@@ -515,97 +469,77 @@ xen_machphys_update(unsigned long mfn, unsigned long pfn)
     
     XPQ_QUEUE[XPQ_IDX].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
     XPQ_QUEUE[XPQ_IDX].val = pfn;
-    xpq_increment_idx();
-    _xpq_flush_queue();
+    xen_increment_idx();
+    _xen_flush_queue();
 }
 
 void
-xpq_queue_pt_update(pt_entry_t *ptr, pt_entry_t val)
+xen_queue_pt_update(pt_entry_t *ptr, pt_entry_t val)
 {
     SET_VCPU();
     
     XPQ_QUEUE[XPQ_IDX].ptr = (memory_t)ptr;
     XPQ_QUEUE[XPQ_IDX].val = (memory_t)val;
-    xpq_increment_idx();
+    xen_increment_idx();
 }
 
 void 
-mcl_queue_pt_update(vm_offset_t va, vm_paddr_t ma)
+xen_pgd_pin(unsigned long ma)
 {
-#if 0
-    printf("setting va %x to ma %x\n", va, ma); 
-#endif
-    SET_VCPU();
-    
-    MCL_QUEUE[MCL_IDX].op = __HYPERVISOR_update_va_mapping;
-    MCL_QUEUE[MCL_IDX].args[0] = (unsigned long)va;
-    MCL_QUEUE[MCL_IDX].args[1] = (unsigned long)ma;
-    MCL_QUEUE[MCL_IDX].args[2] = UVMF_INVLPG|UVMF_LOCAL;
-    mcl_increment_idx();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_PIN_L2_TABLE;
+    op.mfn = ma >> PAGE_SHIFT;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-
-
-void
-xpq_queue_pt_switch(uint32_t val)
+void 
+xen_pgd_unpin(unsigned long ma)
 {
-    vm_paddr_t ma = xpmap_ptom(val) & PG_FRAME;
-    SET_VCPU();
-    
-    XPQ_QUEUE[XPQ_IDX].ptr = ma | MMU_EXTENDED_COMMAND;
-    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_NEW_BASEPTR;
-    xpq_increment_idx();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_UNPIN_TABLE;
+    op.mfn = ma >> PAGE_SHIFT;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-
-void
-xpq_queue_pin_table(uint32_t pa, int type)
+void 
+xen_pt_pin(unsigned long ma)
 {
-    SET_VCPU();
-    
-    
-    XPQ_QUEUE[XPQ_IDX].ptr = pa | MMU_EXTENDED_COMMAND;
-    switch (type) {
-    case XPQ_PIN_L1_TABLE:
-       XPQ_QUEUE[XPQ_IDX].val = MMUEXT_PIN_L1_TABLE;
-       break;
-    case XPQ_PIN_L2_TABLE:
-       XPQ_QUEUE[XPQ_IDX].val = MMUEXT_PIN_L2_TABLE;
-       break;
-    }
-    xpq_increment_idx();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_PIN_L1_TABLE;
+    op.mfn = ma >> PAGE_SHIFT;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-void
-xpq_queue_unpin_table(uint32_t pa)
+void 
+xen_pt_unpin(unsigned long ma)
 {
-    SET_VCPU();
-    
-    XPQ_QUEUE[XPQ_IDX].ptr = pa | MMU_EXTENDED_COMMAND;
-    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_UNPIN_TABLE;
-    xpq_increment_idx();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_UNPIN_TABLE;
+    op.mfn = ma >> PAGE_SHIFT;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-void
-xpq_queue_set_ldt(vm_offset_t va, uint32_t entries)
-{
-    SET_VCPU();
-    
-    KASSERT(va == (va & PG_FRAME), ("ldt not page aligned"));
-    XPQ_QUEUE[XPQ_IDX].ptr = MMU_EXTENDED_COMMAND | va;
-    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_SET_LDT |
-       (entries << MMUEXT_CMD_SHIFT);
-    xpq_increment_idx();
+void 
+xen_set_ldt(unsigned long ptr, unsigned long len)
+{
+    struct mmuext_op op;
+    op.cmd = MMUEXT_SET_LDT;
+    op.linear_addr = ptr;
+    op.nr_ents = len;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-void
-xpq_queue_tlb_flush()
+void xen_tlb_flush(void)
 {
-    SET_VCPU();
-    
-    XPQ_QUEUE[XPQ_IDX].ptr = MMU_EXTENDED_COMMAND;
-    XPQ_QUEUE[XPQ_IDX].val = MMUEXT_TLB_FLUSH;
-    xpq_increment_idx();
+    struct mmuext_op op;
+    op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
+    xen_flush_queue();
+    PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 
index e77f5d6b9145eef4f431bd6f413c39daa2c84927..cfae32112915429c530bd9957a1af34f76f92582 100644 (file)
@@ -49,19 +49,40 @@ static inline int HYPERVISOR_set_trap_table(trap_info_t *table)
     return ret;
 }
 
-static inline int HYPERVISOR_mmu_update(mmu_update_t *req, 
-                                       int count,
-                                       int *success_count)
+static inline int 
+HYPERVISOR_mmu_update(mmu_update_t *req, int count, 
+                     int *success_count, domid_t domid)
 {
     int ret;
+    unsigned long ign1, ign2, ign3, ign4;
     __asm__ __volatile__ (
         TRAP_INSTR
-        : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 
-        "b" (req), "c" (count), "d" (success_count) : "memory" );
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+        : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+        "3" (success_count), "4" (domid)
+        : "memory" );
+
+    return ret;
+}
+
+static inline int
+HYPERVISOR_mmuext_op(
+                    struct mmuext_op *op, int count, int *success_count, domid_t domid)
+{
+    int ret;
+    unsigned long ign1, ign2, ign3, ign4;
+    __asm__ __volatile__ (
+        TRAP_INSTR
+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+        : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
+        "3" (success_count), "4" (domid)
+        : "memory" );
 
     return ret;
 }
 
+
+
 static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
 {
     int ret;
@@ -262,13 +283,13 @@ static inline int HYPERVISOR_multicall(void *call_list, int nr_calls)
 }
 
 static inline int HYPERVISOR_update_va_mapping(
-    unsigned long page_nr, pte_t new_val, unsigned long flags)
+    unsigned long page_nr, unsigned long new_val, unsigned long flags)
 {
     int ret;
     __asm__ __volatile__ (
         TRAP_INSTR
         : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 
-       "b" (page_nr), "c" ((new_val).pte_low), "d" (flags):
+       "b" (page_nr), "c" (new_val), "d" (flags):
        "memory" );
     /* XXX */
 #if 0
index a7596b0e91b4cde4a8e1f89e08e9c8bf1f01939c..c067a1996913fd78e410ca2850f56627df6e527e 100644 (file)
@@ -253,8 +253,8 @@ pte_load_store(pt_entry_t *ptep, pt_entry_t v)
        return (r);
 }
 
-#define        pte_store(ptep, pte)    PT_SET_VA_MA(ptep, pte, TRUE);
-#define pte_clear(pte)          PT_CLEAR_VA(pte, TRUE);
+#define        pte_store(ptep, pte)    PT_SET_VA(ptep, pte, TRUE)
+#define pte_clear(pte)          PT_CLEAR_VA(pte, TRUE)
 
 
 #endif /* _KERNEL */
index 93ffd7853afe097a2b9d1774a8c89f48e27aed95..bd35cf2ff156fe7394830045aded2fb228352ae9 100644 (file)
@@ -1,8 +1,7 @@
-/*     $NetBSD:$       */
-
 /*
  *
  * Copyright (c) 2004 Christian Limpach.
+ * Copyright (c) 2004,2005 Kip Macy
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -59,7 +58,6 @@ extern pteinfo_t *pteinfo_list;
 char *xen_setbootenv(char *cmd_line);
 int xen_boothowto(char *envp);
 void load_cr3(uint32_t val);
-void xen_set_ldt(vm_offset_t, uint32_t);
 void xen_machphys_update(unsigned long, unsigned long);
 void xen_update_descriptor(union descriptor *, union descriptor *);
 void lldt(u_short sel);
@@ -71,14 +69,14 @@ void lldt(u_short sel);
 static __inline void
 invlpg(u_int addr)
 {
-       xpq_queue_invlpg(addr);
+       xen_invlpg(addr);
 }
 
 static __inline void
 invltlb(void)
 {
-       xpq_queue_tlb_flush();
-       mcl_flush_queue();
+       xen_tlb_flush();
+       
 }
 
 
index f445096228622dc939cbf27f6ad90519e638cbed..2149d5dc483024af60bc33b06d53654e44c4284b 100644 (file)
@@ -1,8 +1,7 @@
-/*     $NetBSD:$       */
-
 /*
  *
  * Copyright (c) 2004 Christian Limpach.
+ * Copyright (c) 2004,2005 Kip Macy
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #ifndef _XEN_XENPMAP_H_
 #define _XEN_XENPMAP_H_
 #include <machine/xenvar.h>
-void xpq_physbcopy(const unsigned long *, unsigned long, size_t);
-void xpq_queue_invlpg(vm_offset_t);
-void xpq_queue_pt_update(pt_entry_t *, pt_entry_t);
-void xpq_queue_pt_switch(uint32_t);
-void xpq_queue_set_ldt(vm_offset_t, uint32_t);
-void xpq_queue_tlb_flush(void);
-void xpq_queue_pin_table(uint32_t, int);
-void xpq_queue_unpin_table(uint32_t);
-void xpq_record(unsigned long, unsigned long);
-void mcl_queue_pt_update(vm_offset_t, vm_offset_t);
-void mcl_flush_queue(void);
+void xen_invlpg(vm_offset_t);
+void xen_queue_pt_update(pt_entry_t *, pt_entry_t);
+void xen_pt_switch(uint32_t);
+void xen_set_ldt(unsigned long, unsigned long);
+void xen_tlb_flush(void);
+void xen_pgd_pin(unsigned long);
+void xen_pgd_unpin(unsigned long);
+void xen_pt_pin(unsigned long);
+void xen_pt_unpin(unsigned long);
+void xen_flush_queue(void);
 void pmap_ref(pt_entry_t *pte, unsigned long ma);
 
 
@@ -62,57 +60,76 @@ void pmap_ref(pt_entry_t *pte, unsigned long ma);
 #endif
 
 #define ALWAYS_SYNC 0
-
 #define pmap_valid_entry(E)           ((E) & PG_V) /* is PDE or PTE valid? */
 
-#define        XPQ_PIN_L1_TABLE 1
-#define        XPQ_PIN_L2_TABLE 2
-
 #define        PT_GET(_ptp)                                            \
        (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
+
+#ifdef WRITABLE_PAGETABLES
 #define PT_SET_VA(_ptp,_npte,sync) do {                                \
         PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
-       xpq_queue_pt_update((pt_entry_t *)vtomach((_ptp)),      \
-                           xpmap_ptom((_npte)));               \
-       if (sync || ALWAYS_SYNC)                                \
-               mcl_flush_queue();                              \
+        *(_ptp) = xpmap_ptom((_npte));                            \
 } while (/*CONSTCOND*/0)
 #define PT_SET_VA_MA(_ptp,_npte,sync) do {                 \
         PMAP_REF((_ptp), (_npte));                              \
-       xpq_queue_pt_update((pt_entry_t *)vtomach((_ptp)), (_npte)); \
-       if (sync || ALWAYS_SYNC)                                \
-               mcl_flush_queue();                              \
+        *(_ptp) = (_npte);                                          \
 } while (/*CONSTCOND*/0)
 #define PT_CLEAR_VA(_ptp, sync) do {                           \
         PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
-       xpq_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0);    \
-       if (sync || ALWAYS_SYNC)                                \
-               mcl_flush_queue();                              \
+        *(_ptp) = 0;                                              \
 } while (/*CONSTCOND*/0)
-#define PT_CLEAR(_ptp, sync) do {                               \
-        PMAP_REF((pt_entry_t *)(vtopte(_ptp)), 0);              \
-        mcl_queue_pt_update((unsigned long)_ptp, 0);            \
-        if (sync || ALWAYS_SYNC)                                \
-               mcl_flush_queue();                               \
+
+#define PD_SET_VA(_ptp,_npte,sync) do {                                \
+        PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
+       xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)),      \
+                           xpmap_ptom((_npte)));               \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
 } while (/*CONSTCOND*/0)
-#define PT_SET_MA(_va,_ma,sync) do {                           \
-        PMAP_REF(vtopte((unsigned long)_va), (_ma));            \
-       mcl_queue_pt_update((vm_offset_t )(_va), (_ma)); \
-       if (sync || ALWAYS_SYNC)                                \
-               mcl_flush_queue();                              \
+#define PD_SET_VA_MA(_ptp,_npte,sync) do {                 \
+        PMAP_REF((_ptp), (_npte));                              \
+       xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)), (_npte)); \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
+} while (/*CONSTCOND*/0)
+#define PD_CLEAR_VA(_ptp, sync) do {                           \
+        PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
+       xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0);    \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
+} while (/*CONSTCOND*/0)
+
+
+#else /* !WRITABLE_PAGETABLES */
+
+#define PT_SET_VA(_ptp,_npte,sync) do {                                \
+        PMAP_REF((_ptp), xpmap_ptom(_npte));                    \
+       xen_queue_pt_update((pt_entry_t *)vtomach(_ptp),        \
+                           xpmap_ptom(_npte));                 \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
 } while (/*CONSTCOND*/0)
-#define PT_SET(_va,_pa,sync) do {                              \
-        PMAP_REF((pt_entry_t *)(vtopte(_va)), xpmap_ptom(_pa)); \
-       mcl_queue_pt_update((vm_offset_t)(_va),                 \
-                           xpmap_ptom((_pa)));                 \
+#define PT_SET_VA_MA(_ptp,_npte,sync) do {                 \
+        PMAP_REF((_ptp), (_npte));                              \
+       xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), _npte); \
+       if (sync || ALWAYS_SYNC) xen_flush_queue();             \
+} while (/*CONSTCOND*/0)
+#define PT_CLEAR_VA(_ptp, sync) do {                           \
+        PMAP_REF((pt_entry_t *)(_ptp), 0);                      \
+       xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0);    \
        if (sync || ALWAYS_SYNC)                                \
-               mcl_flush_queue();                              \
+               xen_flush_queue();                              \
 } while (/*CONSTCOND*/0)
 
+#define PD_SET_VA    PT_SET_VA
+#define PD_SET_VA_MA PT_SET_VA_MA
+#define PD_CLEAR_VA  PT_CLEAR_VA
+
+#endif
 
+#define PT_SET_MA(_va, _ma) \
+   HYPERVISOR_update_va_mapping(((unsigned long)_va),  \
+                                ((unsigned long)_ma), \
+                                UVMF_INVLPG| UVMF_LOCAL)\
 
 #define        PT_UPDATES_FLUSH() do {                                 \
-        mcl_flush_queue();                                      \
+        xen_flush_queue();                                      \
 } while (/*CONSTCOND*/0)
 
 
index 1de71545fb500b5f0352dec0bd31ed03cfe277f5..33f1b336c5c17c072f8cf824f6037796f9579e5a 100644 (file)
@@ -444,7 +444,7 @@ xn_alloc_rx_buffers(struct xn_softc *sc)
 
     /* Give away a batch of pages. */
     xn_rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
-    xn_rx_mcl[i].args[0] = (unsigned long) MEMOP_decrease_reservation;
+    xn_rx_mcl[i].args[0] = MEMOP_decrease_reservation;
     xn_rx_mcl[i].args[1] = (unsigned long)xn_rx_pfns;
     xn_rx_mcl[i].args[2] = (unsigned long)i;
     xn_rx_mcl[i].args[3] = 0;
@@ -454,7 +454,7 @@ xn_alloc_rx_buffers(struct xn_softc *sc)
     (void)HYPERVISOR_multicall(xn_rx_mcl, i+1);
 
     /* Check return status of HYPERVISOR_dom_mem_op(). */
-    if ( xn_rx_mcl[i].args[5] != i )
+    if (unlikely(xn_rx_mcl[i].args[5] != i))
         panic("Unable to reduce memory reservation\n");
 
     /* Above is a suitable barrier to ensure backend will see requests. */
@@ -544,6 +544,7 @@ xn_rxeof(struct xn_softc *sc)
        mcl->args[0] = (unsigned long)xn_rx_mmu;
        mcl->args[1] = mmu - xn_rx_mmu;
        mcl->args[2] = 0;
+       mcl->args[3] = DOMID_SELF;
        mcl++;
        (void)HYPERVISOR_multicall(xn_rx_mcl, mcl - xn_rx_mcl);
     }